bitkeeper revision 1.1236.43.4 (424334begQSfXEFH2X7nIwb4TohPsA)
authorkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 24 Mar 2005 21:44:30 +0000 (21:44 +0000)
committerkaf24@firebug.cl.cam.ac.uk <kaf24@firebug.cl.cam.ac.uk>
Thu, 24 Mar 2005 21:44:30 +0000 (21:44 +0000)
Fix TLB flushing on page type changes for SMP guests.
Signed-off-by: Keir Fraser <keir@xensource.com>
xen/arch/x86/mm.c
xen/common/page_alloc.c
xen/include/asm-x86/flushtlb.h

index bd427ca0c5742cd83c86b1409c01c14154164259..ee661bc1861ca0e1769db78ab4347cda53ad1554 100644 (file)
@@ -1147,13 +1147,16 @@ int get_page_type(struct pfn_info *page, u32 type)
                  * may be unnecessary (e.g., page was GDT/LDT) but those
                  * circumstances should be very rare.
                  */
-                struct domain *d = page_get_owner(page);
-                if ( unlikely(NEED_FLUSH(tlbflush_time[d->exec_domain[0]->
-                                                      processor],
-                                         page->tlbflush_timestamp)) )
+                struct exec_domain *ed;
+                unsigned long mask = 0;
+                for_each_exec_domain ( page_get_owner(page), ed )
+                    mask |= 1 << ed->processor;
+                mask = tlbflush_filter_cpuset(mask, page->tlbflush_timestamp);
+
+                if ( unlikely(mask != 0) )
                 {
-                    perfc_incr(need_flush_tlb_flush);
-                    flush_tlb_cpu(d->exec_domain[0]->processor);
+                    perfc_incrc(need_flush_tlb_flush);
+                    flush_tlb_mask(mask);
                 }
 
                 /* We lose existing type, back pointer, and validity. */
index 026217c5afcaa1f85019517e0e6ccb54a0b2b869..4cae1d2a6373f5dd6634dc495146d625b3751d2b 100644 (file)
@@ -470,43 +470,30 @@ void init_domheap_pages(unsigned long ps, unsigned long pe)
 struct pfn_info *alloc_domheap_pages(struct domain *d, unsigned int order)
 {
     struct pfn_info *pg;
-    unsigned long mask, flushed_mask, pfn_stamp, cpu_stamp;
-    int i, j;
+    unsigned long mask = 0;
+    int i;
 
     ASSERT(!in_irq());
 
     if ( unlikely((pg = alloc_heap_pages(MEMZONE_DOM, order)) == NULL) )
         return NULL;
 
-    flushed_mask = 0;
     for ( i = 0; i < (1 << order); i++ )
     {
-        if ( (mask = (pg[i].u.free.cpu_mask & ~flushed_mask)) != 0 )
-        {
-            pfn_stamp = pg[i].tlbflush_timestamp;
-            for ( j = 0; (mask != 0) && (j < smp_num_cpus); j++ )
-            {
-                if ( mask & (1UL<<j) )
-                {
-                    cpu_stamp = tlbflush_time[j];
-                    if ( !NEED_FLUSH(cpu_stamp, pfn_stamp) )
-                        mask &= ~(1UL<<j);
-                }
-            }
-            
-            if ( unlikely(mask != 0) )
-            {
-                flush_tlb_mask(mask);
-                perfc_incrc(need_flush_tlb_flush);
-                flushed_mask |= mask;
-            }
-        }
+        mask |= tlbflush_filter_cpuset(
+            pg[i].u.free.cpu_mask & ~mask, pg[i].tlbflush_timestamp);
 
         pg[i].count_info        = 0;
         pg[i].u.inuse._domain   = 0;
         pg[i].u.inuse.type_info = 0;
     }
 
+    if ( unlikely(mask != 0) )
+    {
+        perfc_incrc(need_flush_tlb_flush);
+        flush_tlb_mask(mask);
+    }
+
     if ( d == NULL )
         return pg;
 
@@ -570,7 +557,7 @@ void free_domheap_pages(struct pfn_info *pg, unsigned int order)
         /* NB. May recursively lock from domain_relinquish_memory(). */
         spin_lock_recursive(&d->page_alloc_lock);
 
-        for_each_exec_domain(d, ed)
+        for_each_exec_domain ( d, ed )
             cpu_mask |= 1 << ed->processor;
 
         for ( i = 0; i < (1 << order); i++ )
index cebb78ebb5d742010fdbdaa5933bf7395b602615..5958b5f524eb833f1aa15667ad98c1b83b7b0a43 100644 (file)
@@ -43,6 +43,26 @@ static inline int NEED_FLUSH(u32 cpu_stamp, u32 lastuse_stamp)
              (lastuse_stamp <= curr_time)));
 }
 
+/*
+ * Filter the given set of CPUs, returning only those that may not have
+ * flushed their TLBs since @page_timestamp.
+ */
+static inline unsigned long tlbflush_filter_cpuset(
+    unsigned long cpuset, u32 page_timestamp)
+{
+    int i;
+    unsigned long remain;
+
+    for ( i = 0, remain = ~0UL; (cpuset & remain) != 0; i++, remain <<= 1 )
+    {
+        if ( (cpuset & (1UL << i)) &&
+             !NEED_FLUSH(tlbflush_time[i], page_timestamp) )
+            cpuset &= ~(1UL << i);
+    }
+
+    return cpuset;
+}
+
 extern void new_tlbflush_clock_period(void);
 
 /* Read pagetable base. */